关于pytorch中的distributedsampler函数使用

1.如何使用这个分布式采样器
在使用函数时,观察loss发现loss收敛有规律,发现是按顺序读取数据,未进行 。
问题的解决方式就是怀疑 seed 有问题,参考源码,,发现的结果依赖 g.(self.epoch) 中的 self.epoch 。
def __iter__(self):# deterministically shuffle based on epochg = torch.Generator()g.manual_seed(self.epoch)if self.shuffle:indices = torch.randperm(len(self.dataset), generator=g).tolist()else:indices = list(range(len(self.dataset)))# add extra samples to make it evenly divisibleindices += indices[:(self.total_size - len(indices))]assert len(indices) == self.total_size# subsampleindices = indices[self.rank:self.total_size:self.num_replicas]assert len(indices) == self.num_samplesreturn iter(indices)
而 self.epoch 初始默认是 0
self.dataset = datasetself.num_replicas = num_replicasself.rank = rankself.epoch = 0self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))self.total_size = self.num_samples * self.num_replicasself.shuffle = shuffle
但是也提供了一个 set 函数来改变 self.epoch
【关于pytorch中的distributedsampler函数使用】def set_epoch(self, epoch):self.epoch = epoch
所以在运行的时候要不断调用这个函数 。只要把我的代码中的
# sampler.set_epoch(e)

关于pytorch中的distributedsampler函数使用

文章插图
全部代码如下:
import torchimport torch.nn as nnfrom torch.utils.data import Dataset, DataLoaderfrom torch.utils.data.distributed import DistributedSamplertorch.distributed.init_process_group(backend="nccl")input_size = 5output_size = 2batch_size = 2data_size = 16local_rank = torch.distributed.get_rank()torch.cuda.set_device(local_rank)device = torch.device("cuda", local_rank)class RandomDataset(Dataset):def __init__(self, size, length, local_rank):self.len = lengthself.data = http://www.kingceram.com/post/torch.stack([torch.ones(5), torch.ones(5)*2,torch.ones(5)*3,torch.ones(5)*4,torch.ones(5)*5,torch.ones(5)*6,torch.ones(5)*7,torch.ones(5)*8,torch.ones(5)*9, torch.ones(5)*10,torch.ones(5)*11,torch.ones(5)*12,torch.ones(5)*13,torch.ones(5)*14,torch.ones(5)*15,torch.ones(5)*16]).to('cuda')self.local_rank = local_rankdef __getitem__(self, index):return self.data[index]def __len__(self):return self.lendataset = RandomDataset(input_size, data_size, local_rank)sampler = DistributedSampler(dataset)rand_loader = DataLoader(dataset=dataset,batch_size=batch_size,sampler=sampler)e = 0while e < 2:t = 0# sampler.set_epoch(e)for data in rand_loader:print(data)e+=1
运行:
CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 test.py
2.关于用不用这个采样器的区别
多卡去训模型,尝试着用DDP模式,而不是DP模式去加速训练(很容易出现负载不均衡的情况) 。遇到了一点关于这个采样器的一点疑惑,想试验下在DDP模式下,使用这个采样器和不使用这个采样器有什么区别 。
实验代码:
整个数据集大小为8, 为4,总共跑2个epoch 。
import torchimport torch.nn as nnfrom torch.utils.data import Dataset, DataLoaderfrom torch.utils.data.distributed import DistributedSamplertorch.distributed.init_process_group(backend="nccl")batch_size = 4data_size = 8local_rank = torch.distributed.get_rank()print(local_rank)torch.cuda.set_device(local_rank)device = torch.device("cuda", local_rank)class RandomDataset(Dataset):def __init__(self, length, local_rank):self.len = lengthself.data = http://www.kingceram.com/post/torch.stack([torch.ones(1), torch.ones(1)*2,torch.ones(1)*3,torch.ones(1)*4,torch.ones(1)*5,torch.ones(1)*6,torch.ones(1)*7,torch.ones(1)*8]).to('cuda')self.local_rank = local_rankdef __getitem__(self, index):return self.data[index]def __len__(self):return self.lendataset = RandomDataset(data_size, local_rank)sampler = DistributedSampler(dataset)#rand_loader =DataLoader(dataset=dataset,batch_size=batch_size,sampler=None,shuffle=True)rand_loader = DataLoader(dataset=dataset,batch_size=batch_size,sampler=sampler)epoch = 0while epoch