# CLASS torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False,# sampler=None, batch_sampler=None, num_workers=0, collate_fn=None, pin_memory=False,# drop_last=False, timeout=0, worker_init_fn=None, multiprocessing_context=None,# generator=None, *, prefetch_factor=2, persistent_workers=False)# 常用参数解释:# dataset (Dataset) – dataset from which to load the data.## batch_size (int, optional) – how many samples per batch to load (default: 1).每次取几个batch size 批量大小## shuffle (bool, optional) – set to True to have the data reshuffled at every epoch (default: False).# 在每次迭代的是否打乱数据,shuffle有洗牌的意思,设置为真,则意味着每次重新洗牌## num_workers (int, optional) – how many subprocesses to use for data loading、0 means that the data will be loaded in the main process、(default: 0)# 线程 windows出问题很可能是线程问题,设置为0# drop_last (bool, optional) – set to True to drop the last incomplete batch, if the dataset size is not divisible by the batch size.# If False and the size of dataset is not divisible by the batch size, then the last batch will be smaller、(default: False)# 意思就是当数据集的个数不能被batch_size整除,那么若为false则保留最后没有被整除的一组数据。import torchvisionfrom torch.utils.data import DataLoaderfrom torch.utils.tensorboard import SummaryWritertest_datas = torchvision.datasets.CIFAR10(root="./dataset", train=False, transform=torchvision.transforms.ToTensor(),download=True)test_loader = DataLoader(dataset=test_datas, batch_size=4, shuffle=True, num_workers=0, drop_last=False)# 意思就是每次从数据集中取四个作为一组,并且每次取完之后都进行洗牌,一个主线程,不舍去未被除尽的第四组# img, target = test_datas[0]# Dataloader每次取得作为一组即 test_loader[i]# 每次一组会返回两个参数,一个是img的集合,另一个便是target的集合,也就是test_datas的返回值 img,target# 示例;writer = SummaryWriter("dataloader")step = 0for data in test_loader: imgs, targets = data # 咱们用imgs表示img的集合,targets表示target的集合。 writer.add_images("dataloader", imgs, step) # 这里用的是add_images(),不是add_image() step = step + 1writer.close()
运行结果: