{"id":3913,"date":"2025-07-19T14:47:21","date_gmt":"2025-07-19T06:47:21","guid":{"rendered":"https:\/\/fushuling.com\/?p=3913"},"modified":"2025-07-19T14:47:22","modified_gmt":"2025-07-19T06:47:22","slug":"%e6%96%b0%e7%94%9flab-5-%e7%bb%8f%e5%85%b8%e6%b7%b1%e5%ba%a6%e5%ad%a6%e4%b9%a0","status":"publish","type":"post","link":"https:\/\/fushuling.com\/index.php\/2025\/07\/19\/%e6%96%b0%e7%94%9flab-5-%e7%bb%8f%e5%85%b8%e6%b7%b1%e5%ba%a6%e5%ad%a6%e4%b9%a0\/","title":{"rendered":"\u65b0\u751fLab-5 \u7ecf\u5178\u6df1\u5ea6\u5b66\u4e60"},"content":{"rendered":"\n<h1 class=\"wp-block-heading\">Task #0: \u5728CIFAR-10\u4e0a\u5b9e\u73b0LeNet-5\u6a21\u578b<\/h1>\n\n\n\n<h2 class=\"wp-block-heading\">\u76ee\u6807<\/h2>\n\n\n\n<ul class=\"wp-block-list\">\n<li>\u638c\u63e1\u56fe\u50cf\u5206\u7c7b\u4efb\u52a1\u548cLeNet-5\u6a21\u578b\u7684\u57fa\u672c\u6784\u6210<\/li>\n\n\n\n<li>\u638c\u63e1\u5377\u79ef\u795e\u7ecf\u7f51\u7edc\u6a21\u578bLeNet-5\u7684\u524d\u5411\u8ba1\u7b97\u548c\u53cd\u5411\u66f4\u65b0\u8fc7\u7a0b<\/li>\n\n\n\n<li>\u638c\u63e1\u635f\u5931\u51fd\u6570\u3001\u968f\u673a\u68af\u5ea6\u4e0b\u964d\u7b49\u6982\u5ff5<\/li>\n<\/ul>\n\n\n\n<h2 class=\"wp-block-heading\">\u5b9e\u9a8c\u5185\u5bb9<\/h2>\n\n\n\n<ul class=\"wp-block-list\">\n<li>\u5728CIFAR-10\u6570\u636e\u96c6\u4e0a\u5b9e\u73b0LeNet-5\u6a21\u578b\u8bad\u7ec3\u548c\u6d4b\u8bd5\u5168\u6d41\u7a0b<\/li>\n\n\n\n<li>\u5728\u5b9e\u9a8c\u73af\u5883\u4e2d\u8bad\u7ec3LeNet-5\u6a21\u578b\uff0c\u5b58\u50a8\u6700\u7ec8\u83b7\u5f97\u7684\u6a21\u578b\u53c2\u6570<\/li>\n\n\n\n<li>\u52a0\u8f7d\u6a21\u578b\u53c2\u6570\u548c\u6d4b\u8bd5\u96c6\uff0c\u8ba1\u7b97\u6a21\u578b\u7684\u5e73\u5747\u5206\u7c7b\u51c6\u786e\u5ea6<\/li>\n\n\n\n<li>Milestone\uff1a\u5206\u7c7b\u51c6\u786e\u5ea6\u8d85\u8fc765%<\/li>\n<\/ul>\n\n\n\n<h2 class=\"wp-block-heading\">\u5b9e\u9a8c\u8fc7\u7a0b<\/h2>\n\n\n\n<p>LeNet-5\u662f\u7531Yann LeCun\u57281998\u5e74\u63d0\u51fa\u7684\u7ecf\u5178\u5377\u79ef\u795e\u7ecf\u7f51\u7edc\uff0c\u6700\u521d\u7528\u4e8e\u624b\u5199\u6570\u5b57\u8bc6\u522b\u3002\u5176\u7ed3\u6784\u7b80\u6d01\u800c\u9ad8\u6548\uff0c\u4e3a\u73b0\u4ee3CNN\u7684\u53d1\u5c55\u5960\u5b9a\u4e86\u57fa\u7840\u3002\u539f\u59cb\u7684LeNet-5\u63a5\u653632&#215;32\u7684\u5355\u901a\u9053\uff08\u7070\u5ea6\uff09\u56fe\u50cf\uff0c\u800cCIFAR-10\u662f32&#215;32\u76843\u901a\u9053\uff08\u5f69\u8272\uff09\u56fe\u50cf\uff0c\u56e0\u6b64\u6211\u4eec\u9700\u8981\u5bf9\u8f93\u5165\u5c42\u8fdb\u884c\u5fae\u8c03\u3002<\/p>\n\n\n\n<p>\u6807\u51c6\u7684LeNet-5\u7ed3\u6784\u5305\u542b\uff1a<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li>\u4e24\u4e2a\u5377\u79ef\u5c42\uff08Convolutional Layer\uff09<\/li>\n\n\n\n<li>\u4e24\u4e2a\u4e0b\u91c7\u6837\u5c42\uff08Subsampling\/Pooling Layer\uff09<\/li>\n\n\n\n<li>\u4e09\u4e2a\u5168\u8fde\u63a5\u5c42\uff08Fully Connected Layer\uff09<\/li>\n<\/ul>\n\n\n\n<p>\u8fd9\u91cc\u6211\u662f\u5728\u672c\u673a\u8dd1\u7684\uff0c\u56e0\u4e3a\u8bad\u7ec3\u7684\u8981\u6c42\u4e5f\u4e0d\u662f\u5f88\u9ad8\uff0c\u5177\u4f53\u7684\u5b9e\u9a8c\u6d41\u7a0b\u5982\u4e0b\uff1a<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><strong>\u6570\u636e\u52a0\u8f7d\u4e0e\u9884\u5904\u7406<\/strong>\n<ul class=\"wp-block-list\">\n<li>\u4f7f\u7528<code>torchvision.datasets.CIFAR10<\/code>\u6a21\u5757\u81ea\u52a8\u4e0b\u8f7d\u5e76\u52a0\u8f7d\u6570\u636e\u96c6\u3002\u901a\u8fc7\u8bbe\u7f6e<code>download=True<\/code>\u53c2\u6570\uff0c\u5728\u672c\u5730\u9996\u6b21\u8fd0\u884c\u65f6\u4f1a\u81ea\u52a8\u4e0b\u8f7d\u6570\u636e\u3002<\/li>\n\n\n\n<li>\u9884\u5904\u7406\u6d41\u7a0b <code>transforms.Compose<\/code> \u5305\u542b\u4e24\u4e2a\u5173\u952e\u6b65\u9aa4\uff1a\n<ul class=\"wp-block-list\">\n<li><strong><code>transforms.ToTensor()<\/code><\/strong>\uff1a\u5c06PIL\u683c\u5f0f\u7684\u56fe\u50cf\u6570\u636e\u8f6c\u6362\u4e3aPyTorch\u5f20\u91cf\uff08Tensor\uff09\uff0c\u5e76\u5c06\u5176\u50cf\u7d20\u503c\u4ece <code>[0, 255]<\/code> \u7684\u8303\u56f4\u5f52\u4e00\u5316\u5230 <code>[0.0, 1.0]<\/code>\u3002<\/li>\n\n\n\n<li><strong><code>transforms.Normalize()<\/code><\/strong>\uff1a\u5bf9\u56fe\u50cf\u5f20\u91cf\u7684\u4e09\u4e2a\u901a\u9053\uff08R, G, B\uff09\u8fdb\u884c\u6807\u51c6\u5316\uff0c\u516c\u5f0f\u4e3a <code>output = (input - mean) \/ std<\/code>\u3002\u6b64\u64cd\u4f5c\u6709\u52a9\u4e8e\u6a21\u578b\u66f4\u5feb\u5730\u6536\u655b\u3002<\/li>\n<\/ul>\n<\/li>\n\n\n\n<li>\u6700\u540e\uff0c\u4f7f\u7528<code>torch.utils.data.DataLoader<\/code>\u5c06\u6570\u636e\u96c6\u5c01\u88c5\u6210\u8fed\u4ee3\u5668\uff0c\u4ee5\u5b9e\u73b0\u6279\u91cf\uff08batch\uff09\u8bad\u7ec3\u548c\u6570\u636e\u6d17\u724c\uff08shuffle\uff09\u3002<\/li>\n<\/ul>\n<\/li>\n\n\n\n<li><strong>\u6a21\u578b\u6784\u5efa (LeNet-5)<\/strong>\n<ul class=\"wp-block-list\">\n<li>\u6211\u4eec\u6784\u5efa\u4e86\u4e00\u4e2aLeNet-5\u7684\u53d8\u4f53\u4ee5\u9002\u5e94CIFAR-10\u7684\u8f93\u5165\u3002\u5173\u952e\u8c03\u6574\u5728\u4e8e\u7b2c\u4e00\u4e2a\u5377\u79ef\u5c42\uff1a\n<ul class=\"wp-block-list\">\n<li><strong>\u539f\u59cbLeNet-5<\/strong>\uff1a\u8f93\u5165\u4e3a\u5355\u901a\u9053\uff08\u7070\u5ea6\uff09\u56fe\u50cf\uff0c<code>in_channels=1<\/code>\u3002<\/li>\n\n\n\n<li><strong>\u672c\u6b21\u5b9e\u9a8c<\/strong>\uff1a\u8f93\u5165\u4e3aCIFAR-10\u7684\u4e09\u901a\u9053\uff08\u5f69\u8272\uff09\u56fe\u50cf\uff0c\u56e0\u6b64<strong><code>in_channels<\/code>\u8bbe\u7f6e\u4e3a3<\/strong>\u3002<\/li>\n<\/ul>\n<\/li>\n\n\n\n<li>\u6a21\u578b\u7ed3\u6784\u5982\u4e0b\uff1a\n<ul class=\"wp-block-list\">\n<li><strong>\u8f93\u5165\u5c42<\/strong>\uff1a3x32x32 \u7684\u56fe\u50cf\u3002<\/li>\n\n\n\n<li><strong>C1 &#8211; \u5377\u79ef\u5c42<\/strong>\uff1a\u4f7f\u752816\u4e2a5&#215;5\u7684\u5377\u79ef\u6838\uff0c\u8f93\u51fa\u4e3a16x28x28\u3002\u540e\u63a5ReLU\u6fc0\u6d3b\u51fd\u6570\u3002<\/li>\n\n\n\n<li><strong>S2 &#8211; \u6c60\u5316\u5c42<\/strong>\uff1a2&#215;2\u7684\u6700\u5927\u6c60\u5316\uff0c\u8f93\u51fa\u4e3a16x14x14\u3002<\/li>\n\n\n\n<li><strong>C3 &#8211; \u5377\u79ef\u5c42<\/strong>\uff1a\u4f7f\u752832\u4e2a5&#215;5\u7684\u5377\u79ef\u6838\uff0c\u8f93\u51fa\u4e3a32x10x10\u3002\u540e\u63a5ReLU\u6fc0\u6d3b\u51fd\u6570\u3002<\/li>\n\n\n\n<li><strong>S4 &#8211; \u6c60\u5316\u5c42<\/strong>\uff1a2&#215;2\u7684\u6700\u5927\u6c60\u5316\uff0c\u8f93\u51fa\u4e3a32x5x5\u3002<\/li>\n\n\n\n<li><strong>\u5c55\u5e73 (Flatten)<\/strong>\uff1a\u5c0632x5x5\u7684\u7279\u5f81\u56fe\u5c55\u5e73\u4e3a\u957f\u5ea6\u4e3a800\u7684\u4e00\u7ef4\u5411\u91cf\u3002<\/li>\n\n\n\n<li><strong>F5 &#8211; \u5168\u8fde\u63a5\u5c42<\/strong>\uff1a800\u4e2a\u8f93\u5165\u8282\u70b9\uff0c120\u4e2a\u8f93\u51fa\u8282\u70b9\u3002\u540e\u63a5ReLU\u6fc0\u6d3b\u51fd\u6570\u3002<\/li>\n\n\n\n<li><strong>F6 &#8211; \u5168\u8fde\u63a5\u5c42<\/strong>\uff1a120\u4e2a\u8f93\u5165\u8282\u70b9\uff0c84\u4e2a\u8f93\u51fa\u8282\u70b9\u3002\u540e\u63a5ReLU\u6fc0\u6d3b\u51fd\u6570\u3002<\/li>\n\n\n\n<li><strong>\u8f93\u51fa\u5c42<\/strong>\uff1a84\u4e2a\u8f93\u5165\u8282\u70b9\uff0c10\u4e2a\u8f93\u51fa\u8282\u70b9\uff0c\u5bf9\u5e94CIFAR-10\u768410\u4e2a\u7c7b\u522b\u3002<\/li>\n<\/ul>\n<\/li>\n<\/ul>\n<\/li>\n\n\n\n<li><strong>\u5b9a\u4e49\u635f\u5931\u51fd\u6570\u4e0e\u4f18\u5316\u5668<\/strong>\n<ul class=\"wp-block-list\">\n<li><strong>\u635f\u5931\u51fd\u6570 (Loss Function)<\/strong>\uff1a\u9009\u7528<code>nn.CrossEntropyLoss<\/code>\uff08\u4ea4\u53c9\u71b5\u635f\u5931\u51fd\u6570\uff09\u3002\u5b83\u5185\u90e8\u96c6\u6210\u4e86Softmax\u64cd\u4f5c\u548c\u8d1f\u5bf9\u6570\u4f3c\u7136\u635f\u5931\uff0c\u662f\u591a\u5206\u7c7b\u4efb\u52a1\u7684\u6807\u51c6\u9009\u62e9\u3002<\/li>\n\n\n\n<li><strong>\u4f18\u5316\u5668 (Optimizer)<\/strong>\uff1a\u9009\u7528<code>optim.SGD<\/code>\uff08\u968f\u673a\u68af\u5ea6\u4e0b\u964d\uff09\u3002\u4e3a\u4e86\u52a0\u901f\u6536\u655b\u5e76\u51cf\u5c11\u9707\u8361\uff0c\u8bbe\u7f6e\u4e86<code>momentum=0.9<\/code>\u3002\u5b66\u4e60\u7387\uff08<code>lr<\/code>\uff09\u8bbe\u7f6e\u4e3a<code>0.001<\/code>\u3002<\/li>\n<\/ul>\n<\/li>\n\n\n\n<li><strong>\u6a21\u578b\u8bad\u7ec3<\/strong>\n<ul class=\"wp-block-list\">\n<li>\u8bad\u7ec3\u8fc7\u7a0b\u5728\u4e00\u4e2a\u5faa\u73af\u4e2d\u6267\u884c20\u4e2a\u5468\u671f\uff08Epoch\uff09\u3002\u5728\u6bcf\u4e2a\u5468\u671f\u5185\uff0c\u6a21\u578b\u904d\u5386\u6574\u4e2a\u8bad\u7ec3\u6570\u636e\u96c6\u3002\u5bf9\u4e8e\u6bcf\u4e00\u4e2a\u6279\u6b21\uff08batch\uff09\u7684\u6570\u636e\uff1a\n<ul class=\"wp-block-list\">\n<li><strong>\u524d\u5411\u4f20\u64ad<\/strong>\uff1a\u5c06\u8f93\u5165\u6570\u636e <code>inputs<\/code> \u9001\u5165\u7f51\u7edc <code>net<\/code>\uff0c\u5f97\u5230\u9884\u6d4b\u503c <code>outputs<\/code>\u3002<\/li>\n\n\n\n<li><strong>\u8ba1\u7b97\u635f\u5931<\/strong>\uff1a\u4f7f\u7528<code>criterion<\/code>\u6bd4\u8f83\u9884\u6d4b\u503c <code>outputs<\/code> \u548c\u771f\u5b9e\u6807\u7b7e <code>labels<\/code>\uff0c\u8ba1\u7b97\u51fa\u635f\u5931 <code>loss<\/code>\u3002<\/li>\n\n\n\n<li><strong>\u53cd\u5411\u4f20\u64ad<\/strong>\uff1a\u8c03\u7528<code>loss.backward()<\/code>\uff0cPyTorch\u4f1a\u81ea\u52a8\u8ba1\u7b97\u635f\u5931\u76f8\u5bf9\u4e8e\u6a21\u578b\u5404\u53c2\u6570\u7684\u68af\u5ea6\u3002<\/li>\n\n\n\n<li><strong>\u53c2\u6570\u66f4\u65b0<\/strong>\uff1a\u8c03\u7528<code>optimizer.step()<\/code>\uff0c\u4f18\u5316\u5668\u6839\u636e\u8ba1\u7b97\u51fa\u7684\u68af\u5ea6\u6765\u66f4\u65b0\u6a21\u578b\u7684\u6743\u91cd\u3002<\/li>\n\n\n\n<li><strong>\u68af\u5ea6\u6e05\u96f6<\/strong>\uff1a\u5728\u4e0b\u4e00\u6b21\u8fed\u4ee3\u524d\uff0c\u8c03\u7528<code>optimizer.zero_grad()<\/code>\u6e05\u9664\u65e7\u7684\u68af\u5ea6\u3002<\/li>\n<\/ul>\n<\/li>\n\n\n\n<li>\u8bad\u7ec3\u5b8c\u6210\u540e\uff0c\u4f7f\u7528<code>torch.save(net.state_dict(), PATH)<\/code>\u5c06\u6a21\u578b\u7684\u6743\u91cd\u53c2\u6570\u4fdd\u5b58\u5230\u78c1\u76d8\u3002<\/li>\n<\/ul>\n<\/li>\n\n\n\n<li><strong>\u6a21\u578b\u8bc4\u4f30<\/strong>\n<ul class=\"wp-block-list\">\n<li>\u521b\u5efa\u4e00\u4e2a\u65b0\u7684LeNet-5\u5b9e\u4f8b\uff0c\u5e76\u4f7f\u7528<code>load_state_dict()<\/code>\u52a0\u8f7d\u5df2\u4fdd\u5b58\u7684\u6743\u91cd\u3002<\/li>\n\n\n\n<li>\u5c06\u6a21\u578b\u5207\u6362\u5230\u8bc4\u4f30\u6a21\u5f0f <code>net.eval()<\/code>\u3002\u8fd9\u4f1a\u5173\u95edDropout\u7b49\u53ea\u5728\u8bad\u7ec3\u65f6\u4f7f\u7528\u7684\u5c42\u3002<\/li>\n\n\n\n<li>\u4f7f\u7528<code>with torch.no_grad()<\/code>\u4e0a\u4e0b\u6587\u7ba1\u7406\u5668\uff0c\u7981\u6b62\u68af\u5ea6\u8ba1\u7b97\u4ee5\u8282\u7701\u5185\u5b58\u548c\u52a0\u901f\u8ba1\u7b97\u3002<\/li>\n\n\n\n<li>\u904d\u5386\u6d4b\u8bd5\u96c6\uff0c\u5c06\u56fe\u7247\u9001\u5165\u6a21\u578b\u8fdb\u884c\u9884\u6d4b\uff0c\u5e76\u5c06\u9884\u6d4b\u7ed3\u679c\u4e0e\u771f\u5b9e\u6807\u7b7e\u8fdb\u884c\u6bd4\u8f83\u3002<\/li>\n\n\n\n<li>\u8ba1\u7b97\u603b\u7684\u6b63\u786e\u6570\u91cf\uff0c\u6700\u7ec8\u901a\u8fc7\u516c\u5f0f <code>Accuracy = (Correct \/ Total) * 100%<\/code> \u8ba1\u7b97\u51fa\u6a21\u578b\u7684\u5e73\u5747\u5206\u7c7b\u51c6\u786e\u5ea6\u3002<\/li>\n<\/ul>\n<\/li>\n<\/ul>\n\n\n\n<p>\u5b8c\u6574\u4ee3\u7801\u5982\u4e0b\uff1a<\/p>\n\n\n\n<pre class=\"wp-block-preformatted\">import torch<br>import torchvision<br>import torchvision.transforms as transforms<br>import torch.nn as nn<br>import torch.optim as optim<br>\u200b<br># \u52a0\u8f7d\u4e0e\u9884\u5904\u7406CIFAR-10\u6570\u636e\u96c6<br>print(\"\u6b63\u5728\u52a0\u8f7d\u548c\u9884\u5904\u7406\u6570\u636e...\")<br>\u200b<br># \u5b9a\u4e49\u6570\u636e\u9884\u5904\u7406\u6b65\u9aa4<br># ToTensor()\u5c06PIL\u56fe\u50cf\u6216numpy.ndarray\u8f6c\u6362\u4e3aFloatTensor\uff0c\u5e76\u5c06\u50cf\u7d20\u503c\u4ece[0, 255]\u7f29\u653e\u5230[0.0, 1.0]<br># Normalize()\u4f7f\u7528\u5747\u503c\u548c\u6807\u51c6\u5dee\u5bf9\u5f20\u91cf\u56fe\u50cf\u8fdb\u884c\u5f52\u4e00\u5316<br>transform = transforms.Compose(<br> &nbsp;  [transforms.ToTensor(),<br> &nbsp; &nbsp; transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])<br>\u200b<br># \u52a0\u8f7d\u8bad\u7ec3\u96c6<br>trainset = torchvision.datasets.CIFAR10(root='.\/data', train=True,<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  download=True, transform=transform)<br>trainloader = torch.utils.data.DataLoader(trainset, batch_size=64,<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  shuffle=True, num_workers=2)<br>\u200b<br># \u52a0\u8f7d\u6d4b\u8bd5\u96c6<br>testset = torchvision.datasets.CIFAR10(root='.\/data', train=False,<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; download=True, transform=transform)<br>testloader = torch.utils.data.DataLoader(testset, batch_size=64,<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; shuffle=False, num_workers=2)<br>\u200b<br># CIFAR-10\u7684\u7c7b\u522b<br>classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')<br>\u200b<br>print(\"\u6570\u636e\u52a0\u8f7d\u5b8c\u6bd5\uff01\")<br>\u200b<br># \u6784\u5efaLeNet-5\u6a21\u578b<br>class LeNet5(nn.Module):<br> &nbsp;  def __init__(self):<br> &nbsp; &nbsp; &nbsp;  super(LeNet5, self).__init__()<br> &nbsp; &nbsp; &nbsp;  # \u5377\u79ef\u5c42\u90e8\u5206<br> &nbsp; &nbsp; &nbsp;  self.conv_net = nn.Sequential(<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  # \u539f\u59cbLeNet-5\u8f93\u5165\u662f\u5355\u901a\u9053\uff0cCIFAR-10\u662f3\u901a\u9053\uff0c\u6545in_channels=3<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  # C1: \u5377\u79ef\u5c421, \u8f93\u51653x32x32, \u8f93\u51fa6x28x28<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1, padding=0),<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  nn.ReLU(),<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  # S2: \u6c60\u5316\u5c421, \u8f93\u516516x28x28, \u8f93\u51fa16x14x14<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  nn.MaxPool2d(kernel_size=2, stride=2),<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  # C3: \u5377\u79ef\u5c422, \u8f93\u516516x14x14, \u8f93\u51fa32x10x10<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=0),<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  nn.ReLU(),<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  # S4: \u6c60\u5316\u5c422, \u8f93\u516532x10x10, \u8f93\u51fa32x5x5<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  nn.MaxPool2d(kernel_size=2, stride=2)<br> &nbsp; &nbsp; &nbsp;  )<br> &nbsp; &nbsp; &nbsp;  # \u5168\u8fde\u63a5\u5c42\u90e8\u5206<br> &nbsp; &nbsp; &nbsp;  self.fc_net = nn.Sequential(<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  # F5: \u5168\u8fde\u63a5\u5c421, \u8f93\u516532*5*5=800, \u8f93\u51fa120<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  nn.Linear(32 * 5 * 5, 120),<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  nn.ReLU(),<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  # F6: \u5168\u8fde\u63a5\u5c422, \u8f93\u5165120, \u8f93\u51fa84<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  nn.Linear(120, 84),<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  nn.ReLU(),<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  # Output: \u8f93\u51fa\u5c42, \u8f93\u516584, \u8f93\u51fa10 (\u5bf9\u5e9410\u4e2a\u7c7b\u522b)<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  nn.Linear(84, 10)<br> &nbsp; &nbsp; &nbsp;  )<br>\u200b<br> &nbsp;  def forward(self, x):<br> &nbsp; &nbsp; &nbsp;  # \u524d\u5411\u8ba1\u7b97<br> &nbsp; &nbsp; &nbsp;  x = self.conv_net(x)<br> &nbsp; &nbsp; &nbsp;  # \u5c06\u5377\u79ef\u5c42\u7684\u8f93\u51fa\u5c55\u5e73 (flatten) \u4ee5\u8f93\u5165\u5230\u5168\u8fde\u63a5\u5c42<br> &nbsp; &nbsp; &nbsp;  x = x.view(-1, 32 * 5 * 5)<br> &nbsp; &nbsp; &nbsp;  x = self.fc_net(x)<br> &nbsp; &nbsp; &nbsp;  return x<br>\u200b<br># \u5b9e\u4f8b\u5316\u6a21\u578b<br>net = LeNet5()<br>print(\"LeNet-5 \u6a21\u578b\u5df2\u6784\u5efa\u3002\")<br>\u200b<br># \u68c0\u67e5\u662f\u5426\u6709\u53ef\u7528\u7684GPU\uff0c\u5e76\u79fb\u52a8\u6a21\u578b\u5230\u5bf9\u5e94\u8bbe\u5907<br>device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")<br>print(f\"\u4f7f\u7528\u8bbe\u5907: {device}\")<br>net.to(device)<br>\u200b<br>\u200b<br># \u5b9a\u4e49\u635f\u5931\u51fd\u6570\u548c\u4f18\u5316\u5668<br>criterion = nn.CrossEntropyLoss()<br>optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)<br>\u200b<br># \u8bad\u7ec3\u6a21\u578b<br>print(\"\u5f00\u59cb\u8bad\u7ec3\u6a21\u578b...\")<br>num_epochs = 20 # \u8bad\u7ec3\u5468\u671f<br>\u200b<br>for epoch in range(num_epochs):<br> &nbsp;  running_loss = 0.0<br> &nbsp;  for i, data in enumerate(trainloader, 0):<br> &nbsp; &nbsp; &nbsp;  # \u83b7\u53d6\u8f93\u5165\u6570\u636e\uff1bdata\u662f\u4e00\u4e2a[inputs, labels]\u7684\u5217\u8868<br> &nbsp; &nbsp; &nbsp;  inputs, labels = data[0].to(device), data[1].to(device)<br>\u200b<br> &nbsp; &nbsp; &nbsp;  # ---- \u524d\u5411\u8ba1\u7b97 ----<br> &nbsp; &nbsp; &nbsp;  outputs = net(inputs)<br> &nbsp; &nbsp; &nbsp;  loss = criterion(outputs, labels)<br>\u200b<br> &nbsp; &nbsp; &nbsp;  # ---- \u53cd\u5411\u66f4\u65b0 ----<br> &nbsp; &nbsp; &nbsp;  # \u68af\u5ea6\u6e05\u96f6<br> &nbsp; &nbsp; &nbsp;  optimizer.zero_grad()<br> &nbsp; &nbsp; &nbsp;  # \u53cd\u5411\u4f20\u64ad<br> &nbsp; &nbsp; &nbsp;  loss.backward()<br> &nbsp; &nbsp; &nbsp;  # \u66f4\u65b0\u53c2\u6570<br> &nbsp; &nbsp; &nbsp;  optimizer.step()<br>\u200b<br> &nbsp; &nbsp; &nbsp;  # \u6253\u5370\u7edf\u8ba1\u4fe1\u606f<br> &nbsp; &nbsp; &nbsp;  running_loss += loss.item()<br> &nbsp; &nbsp; &nbsp;  if i % 200 == 199: &nbsp;  # \u6bcf200\u4e2amini-batches\u6253\u5370\u4e00\u6b21<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  print(f'[Epoch: {epoch + 1}, Batch: {i + 1:5d}] loss: {running_loss \/ 200:.3f}')<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  running_loss = 0.0<br>\u200b<br>print(\"\u6a21\u578b\u8bad\u7ec3\u5b8c\u6210\uff01\")<br>\u200b<br># \u5b58\u50a8\u6a21\u578b\u53c2\u6570<br>MODEL_PATH = 'cifar_lenet5.pth'<br>torch.save(net.state_dict(), MODEL_PATH)<br>print(f\"\u6a21\u578b\u53c2\u6570\u5df2\u4fdd\u5b58\u81f3 {MODEL_PATH}\")<br>\u200b<br>\u200b<br># \u52a0\u8f7d\u6a21\u578b\u5e76\u8fdb\u884c\u6d4b\u8bd5<br>print(\"\\n\u5f00\u59cb\u52a0\u8f7d\u6a21\u578b\u5e76\u8fdb\u884c\u6d4b\u8bd5...\")<br>\u200b<br># \u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u6a21\u578b\u5b9e\u4f8b\u5e76\u52a0\u8f7d\u5df2\u4fdd\u5b58\u7684\u53c2\u6570<br>trained_net = LeNet5()<br>trained_net.load_state_dict(torch.load(MODEL_PATH))<br>trained_net.to(device)<br>\u200b<br># \u5c06\u6a21\u578b\u8bbe\u7f6e\u4e3a\u8bc4\u4f30\u6a21\u5f0f\uff08\u8fd9\u4f1a\u5173\u95eddropout\u7b49\u5c42\uff09<br>trained_net.eval()<br>\u200b<br>correct = 0<br>total = 0<br># \u5728\u6d4b\u8bd5\u65f6\uff0c\u6211\u4eec\u4e0d\u9700\u8981\u8ba1\u7b97\u68af\u5ea6<br>with torch.no_grad():<br> &nbsp;  for data in testloader:<br> &nbsp; &nbsp; &nbsp;  images, labels = data[0].to(device), data[1].to(device)<br> &nbsp; &nbsp; &nbsp;  # \u8fd0\u884c\u6a21\u578b\u5f97\u5230\u9884\u6d4b\u8f93\u51fa<br> &nbsp; &nbsp; &nbsp;  outputs = trained_net(images)<br> &nbsp; &nbsp; &nbsp;  # \u83b7\u53d6\u6700\u53ef\u80fd\u7684\u9884\u6d4b\u7c7b\u522b<br> &nbsp; &nbsp; &nbsp;  _, predicted = torch.max(outputs.data, 1)<br> &nbsp; &nbsp; &nbsp;  total += labels.size(0)<br> &nbsp; &nbsp; &nbsp;  correct += (predicted == labels).sum().item()<br>\u200b<br># \u8ba1\u7b97\u5e76\u6253\u5370\u5e73\u5747\u5206\u7c7b\u51c6\u786e\u5ea6<br>accuracy = 100 * correct \/ total<br>print(f'\u6a21\u578b\u572810000\u5f20\u6d4b\u8bd5\u56fe\u50cf\u4e0a\u7684\u5e73\u5747\u5206\u7c7b\u51c6\u786e\u5ea6: {accuracy:.2f} %')<br>\u200b<br># Milestone Check<br>if accuracy &gt; 65:<br> &nbsp;  print(\"\\n\u606d\u559c\uff01\u5206\u7c7b\u51c6\u786e\u5ea6\u5df2\u8d85\u8fc765%\uff0c\u8fbe\u5230Milestone\uff01\")<br>else:<br> &nbsp;  print(\"\\n\u51c6\u786e\u5ea6\u5c1a\u672a\u8fbe\u523065%\uff0c\u8bf7\u91cd\u65b0\u8bad\u7ec3\uff01\")<\/pre>\n\n\n\n<figure class=\"wp-block-image size-large\"><div class='fancybox-wrapper lazyload-container-unload' data-fancybox='post-images' href='https:\/\/fushuling-1309926051.cos.ap-shanghai.myqcloud.com\/2025\/07\/QQ20250717-095443.png'><img class=\"lazyload lazyload-style-1\" src=\"data:image\/svg+xml;base64,PCEtLUFyZ29uTG9hZGluZy0tPgo8c3ZnIHdpZHRoPSIxIiBoZWlnaHQ9IjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgc3Ryb2tlPSIjZmZmZmZmMDAiPjxnPjwvZz4KPC9zdmc+\"  decoding=\"async\" data-original=\"https:\/\/fushuling-1309926051.cos.ap-shanghai.myqcloud.com\/2025\/07\/QQ20250717-095443.png\" src=\"data:image\/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXYzh8+PB\/AAffA0nNPuCLAAAAAElFTkSuQmCC\" alt=\"QQ20250717-095443\"\/><\/div><\/figure>\n\n\n\n<h1 class=\"wp-block-heading\">Task #1: \u5c06\u9884\u8bad\u7ec3\u6df1\u5ea6\u5377\u79ef\u6a21\u578b\u5728CIFAR-10\u4e0a\u505a\u8fc1\u79fb<\/h1>\n\n\n\n<h2 class=\"wp-block-heading\">\u76ee\u6807<\/h2>\n\n\n\n<ul class=\"wp-block-list\">\n<li>\u638c\u63e1\u6a21\u578b\u8fc1\u79fb\u5b66\u4e60\u6982\u5ff5<\/li>\n\n\n\n<li>\u638c\u63e1\u6a21\u578b\u8fc1\u79fb\u5b66\u4e60\u7684\u57fa\u672c\u65b9\u6cd5<\/li>\n<\/ul>\n\n\n\n<h2 class=\"wp-block-heading\">\u5b9e\u9a8c\u5185\u5bb9<\/h2>\n\n\n\n<ul class=\"wp-block-list\">\n<li>\u4ecetorchvision\u4e0a\u4e0b\u8f7d\u9884\u8bad\u7ec3\u7684\u6df1\u5ea6\u5377\u79ef\u795e\u7ecf\u7f51\u7edc\uff0c\u5982ResNet\u3001VGG\u7b49<\/li>\n\n\n\n<li>\u8c03\u6574\u7f51\u7edc\u7ed3\u6784\uff0c\u4f7f\u5176\u9002\u914dCIFAR-10\u6570\u636e\u96c6\u5206\u7c7b\u4efb\u52a1\u51bb\u7ed3\u7279\u5f81\u63d0\u53d6\u90e8\u5206\uff0c\u8bad\u7ec3\u5206\u7c7b\u5934<\/li>\n\n\n\n<li>\u52a0\u8f7d\u6d4b\u8bd5\u96c6\uff0c\u8ba1\u7b97\u8fc1\u79fb\u540e\u6a21\u578b\u7684\u5e73\u5747\u5206\u7c7b\u51c6\u786e\u5ea6<\/li>\n\n\n\n<li>Milestone\uff1a\u5206\u7c7b\u51c6\u786e\u5ea6\u8d85\u8fc790%<\/li>\n<\/ul>\n\n\n\n<h2 class=\"wp-block-heading\">\u5b9e\u9a8c\u8fc7\u7a0b<\/h2>\n\n\n\n<p>\u6df1\u5ea6\u795e\u7ecf\u7f51\u7edc\u7684\u6210\u529f\u5f80\u5f80\u4f9d\u8d56\u4e8e\u6d77\u91cf\u6807\u6ce8\u6570\u636e\u548c\u5f3a\u5927\u7684\u8ba1\u7b97\u8d44\u6e90\u3002\u8fc1\u79fb\u5b66\u4e60\uff08Transfer Learning\uff09\u4e3a\u5728\u6570\u636e\u6216\u8d44\u6e90\u6709\u9650\u7684\u573a\u666f\u4e0b\u6784\u5efa\u9ad8\u6027\u80fd\u6a21\u578b\u63d0\u4f9b\u4e86\u6709\u6548\u7684\u89e3\u51b3\u65b9\u6848\u3002\u5176\u6838\u5fc3\u601d\u60f3\u662f\uff0c\u5c06\u5728\u5927\u578b\u901a\u7528\u6570\u636e\u96c6\uff08\u5982ImageNet\uff09\u4e0a\u8bad\u7ec3\u597d\u7684\u6a21\u578b\u6240\u5b66\u5230\u7684\u77e5\u8bc6\u201c\u8fc1\u79fb\u201d\u5230\u65b0\u7684\u3001\u7279\u5b9a\u7684\u4efb\u52a1\u4e2d\u3002<\/p>\n\n\n\n<p>\u672c\u6b21\u5b9e\u9a8c\u6d89\u53ca\u4e24\u79cd\u4e3b\u8981\u7684\u8fc1\u79fb\u5b66\u4e60\u7b56\u7565\uff1a<\/p>\n\n\n\n<ol class=\"wp-block-list\">\n<li><strong>\u7279\u5f81\u63d0\u53d6 (Feature Extraction)<\/strong>\uff1a\u6b64\u7b56\u7565\u5047\u5b9a\u9884\u8bad\u7ec3\u6a21\u578b\uff08\u5982ResNet\uff09\u7684\u5377\u79ef\u57fa\u7840\u5c42\u5df2\u7ecf\u5b66\u4e60\u5230\u4e86\u8db3\u591f\u901a\u7528\u7684\u89c6\u89c9\u7279\u5f81\u3002\u56e0\u6b64\uff0c\u6211\u4eec\u51bb\u7ed3\u8fd9\u90e8\u5206\u7f51\u7edc\uff0c\u4ec5\u66ff\u6362\u5e76\u8bad\u7ec3\u9876\u90e8\u7684\u5206\u7c7b\u5c42\uff0c\u4f7f\u5176\u9002\u5e94\u65b0\u6570\u636e\u96c6\u7684\u7c7b\u522b\u3002\u6b64\u65b9\u6cd5\u8bad\u7ec3\u901f\u5ea6\u5feb\uff0c\u8ba1\u7b97\u5f00\u9500\u5c0f\u3002<\/li>\n\n\n\n<li><strong>\u5fae\u8c03 (Fine-tuning)<\/strong>\uff1a\u6b64\u7b56\u7565\u5728\u7279\u5f81\u63d0\u53d6\u7684\u57fa\u7840\u4e0a\u66f4\u8fdb\u4e00\u6b65\u3002\u5728\u521d\u6b65\u8bad\u7ec3\u597d\u65b0\u7684\u5206\u7c7b\u5934\u4e4b\u540e\uff0c\u89e3\u51bb\u4e00\u90e8\u5206\u751a\u81f3\u5168\u90e8\u9884\u8bad\u7ec3\u7684\u5377\u79ef\u5c42\uff0c\u5e76\u4f7f\u7528\u4e00\u4e2a\u975e\u5e38\u4f4e\u7684\u5b66\u4e60\u7387\u5bf9\u6574\u4e2a\u7f51\u7edc\u8fdb\u884c\u8bad\u7ec3\u3002\u8fd9\u4f7f\u5f97\u6a21\u578b\u80fd\u591f\u5728\u4e0d\u9057\u5fd8\u901a\u7528\u77e5\u8bc6\u7684\u524d\u63d0\u4e0b\uff0c\u5fae\u8c03\u5176\u7279\u5f81\u4ee5\u66f4\u597d\u5730\u9002\u5e94\u65b0\u6570\u636e\u96c6\u7684\u7279\u5b9a\u5206\u5e03\uff0c\u901a\u5e38\u80fd\u5e26\u6765\u66f4\u9ad8\u7684\u6027\u80fd\u3002<\/li>\n<\/ol>\n\n\n\n<p>\u5177\u4f53\u6d41\u7a0b\u5982\u4e0b\uff1a<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><strong>\u6570\u636e\u51c6\u5907<\/strong>\n<ul class=\"wp-block-list\">\n<li>\u52a0\u8f7dCIFAR-10\u6570\u636e\u96c6\u3002<\/li>\n\n\n\n<li>\u5b9a\u4e49\u6570\u636e\u53d8\u6362\uff08<code>transforms<\/code>\uff09\uff1a\u5c0632&#215;32\u7684\u56fe\u50cf\u7f29\u653e\u81f3ResNet-18\u6240\u9700\u7684224&#215;224\u5c3a\u5bf8\u3002\u5bf9\u8bad\u7ec3\u96c6\u8fdb\u884c\u968f\u673a\u88c1\u526a\u3001\u6c34\u5e73\u7ffb\u8f6c\u7b49\u6570\u636e\u589e\u5f3a\u64cd\u4f5c\u3002<\/li>\n\n\n\n<li>\u4f7f\u7528ImageNet\u7684\u5747\u503c\u548c\u6807\u51c6\u5dee\u5bf9\u6240\u6709\u56fe\u50cf\u8fdb\u884c\u5f52\u4e00\u5316\u3002<\/li>\n\n\n\n<li>\u521b\u5efa<code>DataLoader<\/code>\u4ee5\u8fdb\u884c\u6279\u91cf\u52a0\u8f7d\u3002<\/li>\n<\/ul>\n<\/li>\n\n\n\n<li><strong>\u6a21\u578b\u6784\u5efa<\/strong>\n<ul class=\"wp-block-list\">\n<li>\u4ece<code>torchvision.models<\/code>\u52a0\u8f7d\u9884\u8bad\u7ec3\u7684<code>ResNet-18<\/code>\u6a21\u578b\u3002<\/li>\n\n\n\n<li>\u66ff\u6362\u5176\u539f\u6709\u7684\u5168\u8fde\u63a5\u5c42\uff08<code>model.fc<\/code>\uff09\uff0c\u4f7f\u5176\u8f93\u51fa\u7ef4\u5ea6\u4ece1000\uff08ImageNet\u7c7b\u522b\u6570\uff09\u53d8\u4e3a10\uff08CIFAR-10\u7c7b\u522b\u6570\uff09\u3002<\/li>\n<\/ul>\n<\/li>\n\n\n\n<li><strong>\u8bad\u7ec3\u7b56\u7565<\/strong>\n<ul class=\"wp-block-list\">\n<li><strong>\u65b9\u6cd5A\uff1a\u7279\u5f81\u63d0\u53d6\uff08\u521d\u59cb\u65b9\u6848\uff09<\/strong>\n<ol class=\"wp-block-list\">\n<li>\u51bb\u7ed3\u9664\u65b0\u66ff\u6362\u7684<code>model.fc<\/code>\u5c42\u5916\u7684\u6240\u6709\u7f51\u7edc\u5c42\u53c2\u6570\uff08<code>param.requires_grad = False<\/code>\uff09\u3002<\/li>\n\n\n\n<li>\u5b9a\u4e49\u4f18\u5316\u5668\uff0c\u4f7f\u5176\u53ea\u66f4\u65b0<code>model.fc<\/code>\u5c42\u7684\u53c2\u6570\u3002<\/li>\n\n\n\n<li>\u8fdb\u884c10\u4e2a\u5468\u671f\u7684\u8bad\u7ec3\u3002<\/li>\n\n\n\n<li>\u4f46\u4ec5\u4f7f\u7528\u7279\u5f81\u63d0\u53d6\u7684\u65b9\u6cd5\uff0c\u6700\u540e\u7684\u6d4b\u8bd5\u7387\u53ea\u6709<strong>78.18%<\/strong>\uff0c\u6548\u679c\u4e00\u822c\uff0c\u63d0\u4f9b\u4e86\u6709\u6548\u7684\u57fa\u7ebf\uff0c\u4f46\u672a\u8fbe\u523090%\u7684\u76ee\u6807\u3002<\/li>\n<\/ol>\n<\/li>\n\n\n\n<li><strong>\u65b9\u6cd5B\uff1a\u4e24\u9636\u6bb5\u5fae\u8c03\uff08\u4f18\u5316\u65b9\u6848\uff09<\/strong>\n<ul class=\"wp-block-list\">\n<li><strong>\u9636\u6bb5\u4e00\uff08\u7279\u5f81\u63d0\u53d6\uff09<\/strong>\uff1a\u540c\u65b9\u6cd5A\uff0c\u4f46\u53ea\u8bad\u7ec3\u8f83\u5c11\u5468\u671f\uff08\u4f8b\u59825\u4e2a\uff09\uff0c\u4ee5\u5feb\u901f\u8bad\u7ec3\u5206\u7c7b\u5934<\/li>\n\n\n\n<li><strong>\u9636\u6bb5\u4e8c\uff08\u5fae\u8c03\uff09<\/strong>\uff1a\n<ul class=\"wp-block-list\">\n<li>\u89e3\u51bb\u6a21\u578b\u7684\u6240\u6709\u5c42\uff08<code>param.requires_grad = True<\/code>\uff09\u3002<\/li>\n\n\n\n<li>\u91cd\u65b0\u5b9a\u4e49\u4e00\u4e2a\u4f18\u5316\u5668\uff0c\u5305\u542b\u6a21\u578b\u7684\u6240\u6709\u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4e00\u4e2a<strong>\u975e\u5e38\u4f4e<\/strong>\u7684\u5b66\u4e60\u7387\uff08\u4f8b\u5982<code>1e-4<\/code>\uff09\u3002<\/li>\n\n\n\n<li>\u7ee7\u7eed\u8fdb\u884c\u82e5\u5e72\u5468\u671f\uff08\u4f8b\u598210\u4e2a\uff09\u7684\u8bad\u7ec3\uff0c\u5fae\u8c03\u6574\u4e2a\u7f51\u7edc\u3002<\/li>\n<\/ul>\n<\/li>\n<\/ul>\n<\/li>\n<\/ul>\n<\/li>\n<\/ul>\n\n\n\n<p>\u5b8c\u6574\u7684\u4ee3\u7801\u5982\u4e0b\uff1a<\/p>\n\n\n\n<pre class=\"wp-block-preformatted\">import torch<br>import torch.nn as nn<br>import torch.optim as optim<br>import torchvision<br>import torchvision.transforms as transforms<br>from torch.utils.data import DataLoader<br>import time<br>\u200b<br>def main():<br> &nbsp;  \"\"\"<br> &nbsp;  \u4e3b\u51fd\u6570\uff0c\u6267\u884c\u6574\u4e2a\u8fc1\u79fb\u5b66\u4e60\u6d41\u7a0b\u3002<br> &nbsp;  \"\"\"<br> &nbsp; &nbsp;<br> &nbsp;  # --- 1. \u8bbe\u5907\u914d\u7f6e ---<br> &nbsp;  device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")<br> &nbsp;  print(f\"\u4f7f\u7528\u7684\u8bbe\u5907: {device}\")<br>\u200b<br> &nbsp;  # --- 2. \u6570\u636e\u52a0\u8f7d\u4e0e\u9884\u5904\u7406 ---<br> &nbsp;  print(\"\u6b63\u5728\u51c6\u5907\u6570\u636e\u96c6...\")<br> &nbsp;  transform_train = transforms.Compose([<br> &nbsp; &nbsp; &nbsp;  transforms.Resize(224),<br> &nbsp; &nbsp; &nbsp;  transforms.RandomResizedCrop(224, scale=(0.8, 1.0)),<br> &nbsp; &nbsp; &nbsp;  transforms.RandomHorizontalFlip(),<br> &nbsp; &nbsp; &nbsp;  transforms.ToTensor(),<br> &nbsp; &nbsp; &nbsp;  transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),<br> &nbsp;  ])<br>\u200b<br> &nbsp;  transform_test = transforms.Compose([<br> &nbsp; &nbsp; &nbsp;  transforms.Resize(256),<br> &nbsp; &nbsp; &nbsp;  transforms.CenterCrop(224),<br> &nbsp; &nbsp; &nbsp;  transforms.ToTensor(),<br> &nbsp; &nbsp; &nbsp;  transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),<br> &nbsp;  ])<br>\u200b<br> &nbsp;  try:<br> &nbsp; &nbsp; &nbsp;  trainset = torchvision.datasets.CIFAR10(root='.\/data', train=True, download=True, transform=transform_train)<br> &nbsp; &nbsp; &nbsp;  testset = torchvision.datasets.CIFAR10(root='.\/data', train=False, download=True, transform=transform_test)<br> &nbsp;  except Exception as e:<br> &nbsp; &nbsp; &nbsp;  print(f\"\u6570\u636e\u4e0b\u8f7d\u5931\u8d25\uff0c\u8bf7\u68c0\u67e5\u7f51\u7edc\u8fde\u63a5\u3002\u9519\u8bef\u4fe1\u606f: {e}\")<br> &nbsp; &nbsp; &nbsp;  return<br>\u200b<br> &nbsp;  trainloader = DataLoader(trainset, batch_size=64, shuffle=True, num_workers=4, pin_memory=True)<br> &nbsp;  testloader = DataLoader(testset, batch_size=64, shuffle=False, num_workers=4, pin_memory=True)<br> &nbsp; &nbsp;<br> &nbsp;  classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')<br> &nbsp;  print(\"\u6570\u636e\u96c6\u51c6\u5907\u5b8c\u6210\u3002\")<br>\u200b<br> &nbsp;  # --- 3. \u52a0\u8f7d\u9884\u8bad\u7ec3\u6a21\u578b\u5e76\u4fee\u6539\u7f51\u7edc\u7ed3\u6784 ---<br> &nbsp;  # \u3010\u91cd\u8981\u3011\u6211\u4eec\u4f7f\u7528 'ResNet18_Weights.DEFAULT' \u6765\u83b7\u53d6\u6700\u65b0\u7684\u9884\u8bad\u7ec3\u6743\u91cd<br> &nbsp;  print(\"\u6b63\u5728\u52a0\u8f7d\u9884\u8bad\u7ec3\u6a21\u578b ResNet-18...\")<br> &nbsp;  model = torchvision.models.resnet18(weights=torchvision.models.ResNet18_Weights.DEFAULT)<br>\u200b<br> &nbsp;  # \u83b7\u53d6\u6700\u540e\u4e00\u4e2a\u5168\u8fde\u63a5\u5c42(fc)\u7684\u8f93\u5165\u7279\u5f81\u6570<br> &nbsp;  num_ftrs = model.fc.in_features<br> &nbsp;  # \u66ff\u6362\u4e3a\u65b0\u7684\u5206\u7c7b\u5934<br> &nbsp;  model.fc = nn.Linear(num_ftrs, len(classes))<br> &nbsp;  model = model.to(device)<br> &nbsp;  print(\"\u6a21\u578b\u52a0\u8f7d\u548c\u4fee\u6539\u5b8c\u6210\u3002\")<br>\u200b<br> &nbsp;  # --- 4. \u4e24\u9636\u6bb5\u8bad\u7ec3 ---<br> &nbsp;  criterion = nn.CrossEntropyLoss()<br> &nbsp;  start_time = time.time()<br> &nbsp; &nbsp;<br> &nbsp;  # === \u9636\u6bb5\u4e00\uff1a\u53ea\u8bad\u7ec3\u5206\u7c7b\u5934 ===<br> &nbsp;  print(\"\\n--- \u5f00\u59cb\u9636\u6bb5\u4e00\uff1a\u7279\u5f81\u63d0\u53d6 (\u53ea\u8bad\u7ec3\u5206\u7c7b\u5934) ---\")<br> &nbsp;  # \u9996\u5148\u51bb\u7ed3\u6240\u6709\u5c42<br> &nbsp;  for param in model.parameters():<br> &nbsp; &nbsp; &nbsp;  param.requires_grad = False<br> &nbsp;  # \u7136\u540e\u89e3\u51bb\u6211\u4eec\u65b0\u52a0\u7684fc\u5c42<br> &nbsp;  for param in model.fc.parameters():<br> &nbsp; &nbsp; &nbsp;  param.requires_grad = True<br>\u200b<br> &nbsp;  # \u4e3a\u5206\u7c7b\u5934\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u4f18\u5316\u5668<br> &nbsp;  optimizer_head = optim.SGD(model.fc.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)<br> &nbsp;  num_epochs_head = 5 # \u5148\u8bad\u7ec35\u4e2aepoch<br>\u200b<br> &nbsp;  for epoch in range(num_epochs_head):<br> &nbsp; &nbsp; &nbsp;  model.train()<br> &nbsp; &nbsp; &nbsp;  running_loss = 0.0<br> &nbsp; &nbsp; &nbsp;  for i, data in enumerate(trainloader, 0):<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  inputs, labels = data[0].to(device), data[1].to(device)<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  optimizer_head.zero_grad()<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  outputs = model(inputs)<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  loss = criterion(outputs, labels)<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  loss.backward()<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  optimizer_head.step()<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  running_loss += loss.item()<br> &nbsp; &nbsp; &nbsp;  print(f'\u9636\u6bb5\u4e00 - Epoch [{epoch + 1}\/{num_epochs_head}], Loss: {running_loss \/ len(trainloader):.4f}')<br>\u200b<br> &nbsp;  print(\"--- \u9636\u6bb5\u4e00\u5b8c\u6210 ---\")<br> &nbsp; &nbsp;<br> &nbsp;  # === \u9636\u6bb5\u4e8c\uff1a\u5fae\u8c03\u6574\u4e2a\u7f51\u7edc ===<br> &nbsp;  print(\"\\n--- \u5f00\u59cb\u9636\u6bb5\u4e8c\uff1a\u5fae\u8c03 (\u89e3\u51bb\u6240\u6709\u5c42) ---\")<br> &nbsp;  # \u89e3\u51bb\u6240\u6709\u5c42\uff0c\u8ba9\u5b83\u4eec\u90fd\u53ef\u4ee5\u88ab\u8bad\u7ec3<br> &nbsp;  for param in model.parameters():<br> &nbsp; &nbsp; &nbsp;  param.requires_grad = True<br>\u200b<br> &nbsp;  # \u4e3a\u6574\u4e2a\u7f51\u7edc\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u4f18\u5316\u5668\uff0c\u4f7f\u7528\u4e00\u4e2a\u975e\u5e38\u4f4e\u7684\u5b66\u4e60\u7387<br> &nbsp;  # \u8fd9\u662f\u5fae\u8c03\u7684\u5173\u952e\uff01<br> &nbsp;  optimizer_finetune = optim.SGD(model.parameters(), lr=0.0001, momentum=0.9, weight_decay=5e-4)<br> &nbsp;  scheduler = optim.lr_scheduler.StepLR(optimizer_finetune, step_size=5, gamma=0.1) # \u5b66\u4e60\u7387\u8870\u51cf<br> &nbsp; &nbsp;<br> &nbsp;  num_epochs_finetune = 10 # \u5fae\u8c0310\u4e2aepoch<br>\u200b<br> &nbsp;  for epoch in range(num_epochs_finetune):<br> &nbsp; &nbsp; &nbsp;  model.train()<br> &nbsp; &nbsp; &nbsp;  running_loss = 0.0<br> &nbsp; &nbsp; &nbsp;  for i, data in enumerate(trainloader, 0):<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  inputs, labels = data[0].to(device), data[1].to(device)<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  optimizer_finetune.zero_grad()<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  outputs = model(inputs)<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  loss = criterion(outputs, labels)<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  loss.backward()<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  optimizer_finetune.step()<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  running_loss += loss.item()<br> &nbsp; &nbsp; &nbsp; &nbsp;<br> &nbsp; &nbsp; &nbsp;  # \u6bcf\u4e2aepoch\u540e\u66f4\u65b0\u5b66\u4e60\u7387\u5e76\u6253\u5370<br> &nbsp; &nbsp; &nbsp;  scheduler.step()<br> &nbsp; &nbsp; &nbsp;  print(f'\u9636\u6bb5\u4e8c - Epoch [{epoch + 1}\/{num_epochs_finetune}], Loss: {running_loss \/ len(trainloader):.4f}, LR: {scheduler.get_last_lr()[0]}')<br>\u200b<br>\u200b<br> &nbsp;  end_time = time.time()<br> &nbsp;  print(f'\\n\u8bad\u7ec3\u5b8c\u6210\uff01\u603b\u8017\u65f6: {(end_time - start_time) \/ 60:.2f} \u5206\u949f')<br>\u200b<br> &nbsp;  # --- 5. \u6d4b\u8bd5\u6a21\u578b ---<br> &nbsp;  print(\"\\n\u5f00\u59cb\u5728\u6d4b\u8bd5\u96c6\u4e0a\u8bc4\u4f30\u6700\u7ec8\u6a21\u578b...\")<br> &nbsp;  model.eval()<br> &nbsp;  correct = 0<br> &nbsp;  total = 0<br> &nbsp;  with torch.no_grad():<br> &nbsp; &nbsp; &nbsp;  for data in testloader:<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  images, labels = data[0].to(device), data[1].to(device)<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  outputs = model(images)<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  _, predicted = torch.max(outputs.data, 1)<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  total += labels.size(0)<br> &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;  correct += (predicted == labels).sum().item()<br>\u200b<br> &nbsp;  accuracy = 100 * correct \/ total<br> &nbsp;  print(f'\\n\u572810000\u5f20\u6d4b\u8bd5\u56fe\u50cf\u4e0a\u7684\u6700\u7ec8\u5e73\u5747\u5206\u7c7b\u51c6\u786e\u5ea6: {accuracy:.2f} %')<br>\u200b<br> &nbsp;  # --- 6. Milestone\u68c0\u67e5 ---<br> &nbsp;  if accuracy &gt; 90:<br> &nbsp; &nbsp; &nbsp;  print(\"\\n\ud83c\udf89 \u606d\u559c\uff01\u6210\u529f\u8fbe\u5230Milestone\uff1a\u5206\u7c7b\u51c6\u786e\u5ea6\u8d85\u8fc790%\uff01\")<br> &nbsp;  else:<br> &nbsp; &nbsp; &nbsp;  print(\"\\n- \u51c6\u786e\u5ea6\u672a\u8fbe\u523090%\u76ee\u6807\u3002\")<br> &nbsp; &nbsp; &nbsp; &nbsp;<br>if __name__ == '__main__':<br> &nbsp;  main()<\/pre>\n\n\n\n<p>\u8fd9\u91cc\u6211\u4f7f\u7528\u7684\u662f\u5b66\u59d0\u4e0a\u8bfe\u8bf4\u7684\u90a3\u4e2a\u7b97\u529b\u5e73\u53f0\uff0c\u56e0\u4e3a\u6211\u672c\u5730\u6709\u70b9\u5e26\u4e0d\u52a8\uff1a<\/p>\n\n\n\n<figure class=\"wp-block-image size-large\"><div class='fancybox-wrapper lazyload-container-unload' data-fancybox='post-images' href='https:\/\/fushuling-1309926051.cos.ap-shanghai.myqcloud.com\/2025\/07\/14aaa636a926a5e5a81bab261a176f1.png'><img class=\"lazyload lazyload-style-1\" src=\"data:image\/svg+xml;base64,PCEtLUFyZ29uTG9hZGluZy0tPgo8c3ZnIHdpZHRoPSIxIiBoZWlnaHQ9IjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgc3Ryb2tlPSIjZmZmZmZmMDAiPjxnPjwvZz4KPC9zdmc+\"  decoding=\"async\" data-original=\"https:\/\/fushuling-1309926051.cos.ap-shanghai.myqcloud.com\/2025\/07\/14aaa636a926a5e5a81bab261a176f1.png\" src=\"data:image\/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXYzh8+PB\/AAffA0nNPuCLAAAAAElFTkSuQmCC\" alt=\"14aaa636a926a5e5a81bab261a176f1\"\/><\/div><\/figure>\n\n\n\n<p>\u6700\u540e\u7684\u5b9e\u9a8c\u7ed3\u679c\u5982\u4e0b\uff1a <\/p>\n\n\n\n<figure class=\"wp-block-image size-large\"><div class='fancybox-wrapper lazyload-container-unload' data-fancybox='post-images' href='https:\/\/fushuling-1309926051.cos.ap-shanghai.myqcloud.com\/2025\/07\/b6ea5083c18c1157335cb4a6d343777.png'><img class=\"lazyload lazyload-style-1\" src=\"data:image\/svg+xml;base64,PCEtLUFyZ29uTG9hZGluZy0tPgo8c3ZnIHdpZHRoPSIxIiBoZWlnaHQ9IjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgc3Ryb2tlPSIjZmZmZmZmMDAiPjxnPjwvZz4KPC9zdmc+\"  decoding=\"async\" data-original=\"https:\/\/fushuling-1309926051.cos.ap-shanghai.myqcloud.com\/2025\/07\/b6ea5083c18c1157335cb4a6d343777.png\" src=\"data:image\/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXYzh8+PB\/AAffA0nNPuCLAAAAAElFTkSuQmCC\" alt=\"\"\/><\/div><\/figure>\n","protected":false},"excerpt":{"rendered":"<p>Task #0: \u5728CIFAR-10\u4e0a\u5b9e\u73b0LeNet-5\u6a21\u578b \u76ee\u6807 \u5b9e\u9a8c\u5185\u5bb9 \u5b9e\u9a8c\u8fc7\u7a0b LeNet-5\u662f\u7531Ya [&hellip;]<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[15],"tags":[],"class_list":["post-3913","post","type-post","status-publish","format-standard","hentry","category-lab"],"_links":{"self":[{"href":"https:\/\/fushuling.com\/index.php\/wp-json\/wp\/v2\/posts\/3913","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/fushuling.com\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/fushuling.com\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/fushuling.com\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/fushuling.com\/index.php\/wp-json\/wp\/v2\/comments?post=3913"}],"version-history":[{"count":1,"href":"https:\/\/fushuling.com\/index.php\/wp-json\/wp\/v2\/posts\/3913\/revisions"}],"predecessor-version":[{"id":3914,"href":"https:\/\/fushuling.com\/index.php\/wp-json\/wp\/v2\/posts\/3913\/revisions\/3914"}],"wp:attachment":[{"href":"https:\/\/fushuling.com\/index.php\/wp-json\/wp\/v2\/media?parent=3913"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/fushuling.com\/index.php\/wp-json\/wp\/v2\/categories?post=3913"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/fushuling.com\/index.php\/wp-json\/wp\/v2\/tags?post=3913"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}